You are provided with a training set and a test set of images of 3 classes. Each image has a filename that is its unique id. The dataset comprises 3 classes: COVID-19, Viral Pneumonia, and Normal. The goal of the project is to create a classifier capable of determining the class of the X-ray image.
# Clear Session
from keras import backend
backend.clear_session()
import PIL
import cv2
import math
import keras
import warnings
import random
import pprint
from IPython.display import clear_output
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
import seaborn as sns
import tensorflow as tf
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
import sklearn.metrics as metrics
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from keras import losses
from keras.models import Sequential
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import img_to_array
from tensorflow.keras.utils import load_img
from keras.layers import BatchNormalization
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import LeakyReLU
from keras import optimizers
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelBinarizer
from PIL import Image
from matplotlib import pyplot
from sklearn.metrics import accuracy_score, confusion_matrix
warnings.filterwarnings('ignore')
images_urls = {
"lungs" : "https://pgaimlimages.blob.core.windows.net/images/lungs.png?sp=r&st=2022-05-29T20:30:27Z&se=2022-08-01T04:30:27Z&spr=https&sv=2020-08-04&sr=b&sig=dxvKykbbzm%2BD48X0sAWu4laEqHMDbYUg1Stm1qtElww%3D",
"Covid" : "https://pgaimlimages.blob.core.windows.net/images/Covid.png?sp=r&st=2022-05-29T20:34:55Z&se=2022-08-01T04:34:55Z&spr=https&sv=2020-08-04&sr=b&sig=WWdb9XsWaVsacmh8LRX8ltIxR2446mJdFj9rD0OacD0%3D",
"Normal" : "https://pgaimlimages.blob.core.windows.net/images/Normal.png?sp=r&st=2022-05-29T20:35:23Z&se=2022-05-30T04:35:23Z&spr=https&sv=2020-08-04&sr=b&sig=XXxAwNfhy4B7HPEbW0LM0Uf82cCLROcz23jyPnaGscA%3D",
"Viral Pneumonia" : "https://pgaimlimages.blob.core.windows.net/images/Viral%20Pneumonia.png?sp=r&st=2022-05-29T20:35:40Z&se=2022-05-30T04:35:40Z&spr=https&sv=2020-08-04&sr=b&sig=kEdgWJvgJfBAXD1ydeZJ97jtN21Io2y%2Bq74nCwkKBcg%3D"
}
images_data = np.load("CovidImages.npy")
images_labels = pd.read_csv("CovidLabels.csv")
images_data[0].shape
(128, 128, 3)
def CreateImageLabelsDataFrame(labels_df, images_ndarry):
df = labels_df.copy()
df['Image_Matrix'] = pd.Series(images_ndarry.tolist())
df['Label'] = df['Label'].astype("string")
return df
def info(dataFrame):
print(f"{dataFrame.shape[0]} Rows x {dataFrame.shape[1]} Columns")
missing_values = dataFrame.isna().sum() # Get a series counting number of empty values for each column
non_missing_values = dataFrame.notnull().sum() # Get a series counting number of non empty valuesfor each column
missing_values_percentage = ((missing_values * 100)/(missing_values + non_missing_values)).astype(float)
column_datatypes = dataFrame.dtypes # Get a series containing data types for each column
features = [missing_values, missing_values_percentage, column_datatypes]
feature_names = ["No. Of Missing Values", "Missing Values %", "Data Type"]
df = pd.concat(
objs = features,
axis = 1,
keys = feature_names,
sort = True)
cm = sns.light_palette("red", as_cmap=True)
display(df.style.background_gradient(cmap=cm, subset=pd.IndexSlice[:, feature_names[1]]).format(formatter={(feature_names[1]): "{:.2f}%"}))
main_data = CreateImageLabelsDataFrame(images_labels, images_data)
info(main_data)
251 Rows x 2 Columns
| No. Of Missing Values | Missing Values % | Data Type | |
|---|---|---|---|
| Image_Matrix | 0 | 0.00% | object |
| Label | 0 | 0.00% | string |
def ExploreDataDistributionByColumn(df, category):
col_name = category
airline_names = df[col_name].unique()
airline_names_count = []
airline_names_count_perc = []
ids = []
count = 0
for airline_name in airline_names:
total_count = df[col_name].count()
airline_name_count = df[df[col_name] == airline_name][col_name].count()
airline_names_count.append(airline_name_count)
airline_names_count_perc.append(f"{str(round((airline_name_count/total_count) * 100, 2))}%")
ids.append(count)
count = count + 1
return pd.DataFrame({
"id" : ids,
f"{category} Name" : airline_names,
"Count" : airline_names_count,
"Category %" : airline_names_count_perc
})
ExploreDataDistributionByColumn(
df = main_data,
category = "Label")
| id | Label Name | Count | Category % | |
|---|---|---|---|---|
| 0 | 0 | Viral Pneumonia | 70 | 27.89% |
| 1 | 1 | Covid | 111 | 44.22% |
| 2 | 2 | Normal | 70 | 27.89% |
def PlotDataDistribution(df):
color_palette = {'Viral Pneumonia': 'dodgerblue', 'Covid': 'crimson', 'Normal': 'limegreen'}
fig, ax = plt.subplots(figsize=(24,15))
sns.countplot(x="Label", data=df, palette=color_palette, ax=ax)
ax.set_xlabel("X-Ray Image Category", fontsize=14)
ax.set_ylabel("Number of Images", fontsize=14)
ax.set_title("Lungs X-ray Images Count", fontsize=20)
total = 0
for p in ax.patches:
if p.get_height() > 0:
total += p.get_height()
label_index = 0
labels = df["Label"].unique()
for p in ax.patches:
if p.get_height() > 0:
percentage = str(((p.get_height() / total) * 100).round(1))+ '%'
txt_x = p.get_x() + p.get_width()/4
txt_y = p.get_height() - p.get_height()/4
ax.text(txt_x,txt_y-8, labels[label_index], color="white", weight="medium", fontsize="30")
ax.text(txt_x,txt_y, f"{percentage} ({p.get_height()})", color="white", weight="medium", fontsize="30")
label = labels[label_index]
img = plt.imread(images_urls[label])
im = OffsetImage(img, zoom=0.25)
im.image.axes = ax
x = p.get_x() + p.get_width()/2
x_offset = 0
ab = AnnotationBbox(im, (x, 10), xybox=(x_offset, 0), frameon=False,
xycoords='data', boxcoords="offset points", pad=0)
ax.add_artist(ab)
label_index = label_index + 1
plt.show()
PlotDataDistribution(main_data)
Viral Pneumonia: A normal lung is mostly air filled and should have black in the lung area. Pneumonea filled lungs have liquid at the lower part of the lungs increasing the density of that part of the lungs making it more light grey/whitish since the denisty will be close to that of a soft tissue.
Covid Lungs: Covid affects the air sacs causing them to stiffen/fill with fluid. This increases the density in those areas thus we will see light grey/white spots in those parts of the lungs where the air sacs are.
Normal Lungs: Normals should be air filled and thus will have more darker /black color drawing the lungs.

count_label = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
def DisplayImageCategoryPlot(df):
unique_names = df['Label'].unique()
images = []
for image_name in unique_names:
images.append(df[df['Label'] == image_name].head(1)['Image_Matrix'].tolist()[0])
grouped_images_df = pd.DataFrame({"Label": unique_names, "Image_Matrix": images})
fig, axes = plt.subplots(1, 3, figsize=(16, 6))
image_index = 0
for row in range(1):
for col in range(3):
if (image_index < len(unique_names)):
axes[col].imshow(grouped_images_df['Image_Matrix'][image_index])
axes[col].set_title(f"({count_label[image_index]}) {grouped_images_df['Label'][image_index]}")
image_index += 1
plt.show()
def ExploreXRayImages(data, categor_name):
images = data[data['Label'] == categor_name]['Image_Matrix'].tolist()
fig, axes = plt.subplots(3, 3, figsize=(16, 14))
image_index = 0
count = 0
for row in range(3):
for col in range(3):
image_index = random.randrange(len(images))
axes[row][col].imshow(images[image_index])
axes[row][col].set_title(f"({count_label[count]}) {categor_name} X-ray at index={image_index}")
count = count + 1
plt.show()
DisplayImageCategoryPlot(main_data)
ExploreXRayImages(main_data, "Viral Pneumonia")
We see the brightness also on the edges of the lungs
The major difference between the Covid lungs and the Pneamonia lungs is that the whiteness in covid images appearrs in smaller chunks and lines and in most part of the lungs where as the pneumonia x-rays have most whiteness in big areas and mostly at the bottom of the lungs.
ExploreXRayImages(main_data, "Covid")
ExploreXRayImages(main_data, "Normal")
def BeforeAfter(before_data, after_data, tranformation_name, xray_category):
before_images = before_data[before_data['Label'] == xray_category]['Image_Matrix'].tolist()
after_images = after_data[after_data['Label'] == xray_category]['Image_Matrix'].tolist()
fig, axes = plt.subplots(3, 2, figsize=(45, 45))
image_index = 0
count = 0
for row in range(3):
image_index = random.randrange(len(before_images))
axes[row][0].imshow(before_images[image_index])
axes[row][0].set_title(f"({count_label[count]}) Before: {tranformation_name} at index={image_index}", fontsize=40)
axes[row][1].imshow(after_images[image_index])
axes[row][1].set_title(f"({count_label[count]}) After: {tranformation_name} at index={image_index}", fontsize=40)
count = count + 1
plt.show()
def Blur(img):
img_temp = Image.fromarray(np.asarray(img).astype(np.uint8))
imcv = cv2.cvtColor(np.asarray(img_temp), cv2.COLOR_RGB2BGR)
return cv2.GaussianBlur(imcv, ksize=(5, 5), sigmaX=5)
main_data_transformed = main_data.copy()
main_data_transformed['Image_Matrix'] = main_data_transformed['Image_Matrix'].apply(Blur)
DisplayImageCategoryPlot(main_data_transformed)
BeforeAfter(main_data, main_data_transformed, "Gaussian Blurring", "Viral Pneumonia")
ExploreXRayImages(main_data_transformed, "Viral Pneumonia")
BeforeAfter(main_data, main_data_transformed, "Gaussian Blurring", "Covid")
ExploreXRayImages(main_data_transformed, "Covid")
BeforeAfter(main_data, main_data_transformed, "Gaussian Blurring", "Normal")
ExploreXRayImages(main_data_transformed, "Normal")
def CreateAndGetModelData(data, transformer=str("NaN"), should_transform=False):
data_temp = data.copy()
if should_transform:
data_temp['Image_Matrix'] = data_temp['Image_Matrix'].apply(transformer)
print("===============Input Data===============")
X = np.asarray(data_temp['Image_Matrix'].values.tolist())
y = np.asarray(data_temp[['Label']].values.tolist())
print(f"X Data Shape: {X.shape}")
print(f"Y Data Shape: {y.shape}\n")
print("===============Split Data===================")
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
print(f"x_train shape: {x_train.shape}, y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape}, y_test shape: {y_test.shape}\n")
print("==============Reshape the Data=============")
x_train = x_train.reshape(x_train.shape[0], 128, 128, 3)
x_test = x_test.reshape(x_test.shape[0], 128, 128, 3)
print(f"x_train shape: {x_train.shape}, y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape}, y_test shape: {y_test.shape}\n")
print("============Normalize the Data===============")
x_train_normalized = x_train.astype('float32')/255.0
x_test_normalized = x_test.astype('float32')/255.0
print(f"x_train_normalized shape: {x_train_normalized.shape}")
print(f"x_test_normalized shape: {x_test_normalized.shape}\n")
print("==========Encode Target Variables============")
label_binarizer = LabelBinarizer()
y_train_encoded =label_binarizer.fit_transform(y_train)
y_test_encoded =label_binarizer.fit_transform(y_test)
print(f"y_train_encoded shape: {y_train_encoded.shape}")
print(f"y_test_encoded shape: {y_test_encoded.shape}\n")
return {
"x_train": x_train_normalized,
"y_train": y_train_encoded,
"x_test" : x_test_normalized,
"y_test" : y_test_encoded,
'encoder': label_binarizer
}
model_evaluation_scores = {
"Hidden Layer Activation" : [],
"Output Layer Activation" : [],
"Loss" : [],
"Weight Initialisation" : [],
"Max Accuracy Train" : [],
"Max Accuracy Test" : [],
"Max Recall Train" : [],
"Max Recall Test" : []
}
def GetBaseModel(current_optimiser, data, activation_funtion, scores, initializer='random_normal'):
backend.clear_session()
model = keras.Sequential()
model.add(Conv2D(64, (3, 3), activation=activation_funtion, padding="same", input_shape=(128, 128, 3)))
model.add(MaxPooling2D((2, 2), padding = 'same'))
model.add(Conv2D(32, (3, 3), activation=activation_funtion, padding="same"))
model.add(MaxPooling2D((2, 2), padding = 'same'))
model.add(BatchNormalization())
model.add(Conv2D(16, (3, 3), activation=activation_funtion, padding="same"))
model.add(Flatten())
model.add(Dense(32, activation=activation_funtion))
model.add(Dropout(0.25))
model.add(Dense(16, activation=activation_funtion))
model.add(Dense(3, activation='softmax'))
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy', 'FalsePositives', 'TruePositives', 'Recall'])
history = model.fit(
data['x_train'],
data['y_train'],
epochs=50,
validation_split=0.2,
verbose=1)
model.summary()
print(model.metrics_names)
history = pd.DataFrame.from_dict(history.history)
history_stats = history.describe().T
scores["Hidden Layer Activation"].append(activation_funtion)
scores["Output Layer Activation"].append("Softmax")
scores["Max Accuracy Train"].append(history_stats.loc['accuracy']['max'])
scores["Max Accuracy Test"].append(history_stats.loc['val_accuracy']['max'])
scores["Max Recall Train"].append(history_stats.loc['recall']['max'])
scores["Max Recall Test"].append(history_stats.loc['val_recall']['max'])
scores["Weight Initialisation"].append(initializer)
scores["Loss"].append("binary_crossentropy")
return {
'Model' : model,
'History' : history,
"Stats" : scores
}
def Scores(dic):
return pd.DataFrame.from_dict(dic).T
def GetModelPredictions(model, data):
y_pred = model.predict(data['x_test'])
y_pred = np.round(y_pred,0)
print(classification_report(data['y_test'],y_pred))
return y_pred
def DisplayConfusionMatrix(y_actual, y_predict, encoder, label):
encoded = np.argmax(y_actual, axis=1)
encoded_pred = np.argmax(y_predict, axis=1)
cm = confusion_matrix(encoded, encoded_pred)
fig, ax = plt.subplots(figsize=(12,8))
group_counts = ["{0:0.0f}".format(value) for value in cm.flatten()]
group_percetages = ["{0:.2%}".format(value) for value in cm.flatten()/np.sum(cm)]
labels = [f"{v1}\n{v2}" for v1, v2 in zip(group_counts, group_percetages)]
labels = np.array(labels).reshape(3, 3)
sns.heatmap(data=cm, annot=labels,fmt='', xticklabels=encoder.classes_, yticklabels=encoder.classes_)
plt.title(label, fontsize=14)
plt.ylabel("True Label", fontsize=14)
plt.xlabel("Predicted Label", fontsize=14)
plt.show()
def ShowModelStats(history, actual_y, predictions_y, label, encoder):
# chart 1
DisplayConfusionMatrix(actual_y, predictions_y, encoder, label)
# chart 2
plt.figure(figsize= (10,7))
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
plt.title(f'Model Accurracy Summary: {label}', fontsize=14)
plt.ylabel('Score', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.legend(['accuracy', 'val_accuracy'], loc='best')
plt.show()
# chart 3
plt.figure(figsize= (10,7))
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title(f'Model Loss Summary: {label}', fontsize=14)
plt.ylabel('Score', fontsize=14)
plt.xlabel('epoch')
plt.legend(['loss', 'val_loss'], loc='best')
plt.show()
# chart 4
plt.figure(figsize= (10,7))
plt.plot(history['recall'])
plt.plot(history['val_recall'])
plt.title(f'Model Recall Summary: {label}', fontsize=14)
plt.ylabel('recall', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.legend(['recall', 'val_recall'], loc='best')
plt.show()
def GetModelStats(model):
evaluation_history = model['History']
return evaluation_history.describe()
def GetModelAccuracy():
y_test_pred_ln3 = model_3.predict(X_test)
y_test_pred_classes_ln3 = np.argmax(y_test_pred_ln3, axis=1)
return accuracy_score(normal_y_test, y_test_pred_classes_ln3)
def Scores(dic):
return pd.DataFrame.from_dict(dic).T
model_data = CreateAndGetModelData(main_data)
===============Input Data=============== X Data Shape: (251, 128, 128, 3) Y Data Shape: (251, 1) ===============Split Data=================== x_train shape: (200, 128, 128, 3), y_train shape: (200, 1) x_test shape: (51, 128, 128, 3), y_test shape: (51, 1) ==============Reshape the Data============= x_train shape: (200, 128, 128, 3), y_train shape: (200, 1) x_test shape: (51, 128, 128, 3), y_test shape: (51, 1) ============Normalize the Data=============== x_train_normalized shape: (200, 128, 128, 3) x_test_normalized shape: (51, 128, 128, 3) ==========Encode Target Variables============ y_train_encoded shape: (200, 3) y_test_encoded shape: (51, 3)
model_results = GetBaseModel(
current_optimiser = "adam",
data = model_data,
activation_funtion = 'relu',
scores = model_evaluation_scores)
model_evaluation_scores = model_results['Stats']
clear_output()
model_results['History'].describe().T
| count | mean | std | min | 25% | 50% | 75% | max | |
|---|---|---|---|---|---|---|---|---|
| loss | 50.0 | 0.130620 | 0.207884 | 0.003735 | 0.011523 | 0.041565 | 0.139558 | 1.059983 |
| accuracy | 50.0 | 0.948125 | 0.089395 | 0.568750 | 0.957813 | 0.984375 | 0.993750 | 1.000000 |
| false_positives | 50.0 | 5.120000 | 7.144685 | 0.000000 | 0.250000 | 2.000000 | 6.750000 | 36.000000 |
| true_positives | 50.0 | 148.740000 | 19.677823 | 71.000000 | 152.500000 | 157.500000 | 159.000000 | 160.000000 |
| recall | 50.0 | 0.929625 | 0.122986 | 0.443750 | 0.953125 | 0.984375 | 0.993750 | 1.000000 |
| val_loss | 50.0 | 0.767058 | 0.371617 | 0.196008 | 0.383188 | 0.872988 | 1.074057 | 1.308286 |
| val_accuracy | 50.0 | 0.664000 | 0.190873 | 0.375000 | 0.500000 | 0.650000 | 0.850000 | 0.950000 |
| val_false_positives | 50.0 | 7.140000 | 6.848238 | 0.000000 | 2.000000 | 5.000000 | 12.750000 | 22.000000 |
| val_true_positives | 50.0 | 20.920000 | 13.127071 | 0.000000 | 12.500000 | 23.500000 | 33.000000 | 36.000000 |
| val_recall | 50.0 | 0.523000 | 0.328177 | 0.000000 | 0.312500 | 0.587500 | 0.825000 | 0.900000 |
y_predictions = GetModelPredictions(model_results['Model'], model_data)
2/2 [==============================] - 0s 75ms/step
precision recall f1-score support
0 1.00 0.71 0.83 14
1 0.79 1.00 0.88 19
2 0.94 0.89 0.91 18
micro avg 0.88 0.88 0.88 51
macro avg 0.91 0.87 0.88 51
weighted avg 0.90 0.88 0.88 51
samples avg 0.88 0.88 0.88 51
accuracy = model_results['Model'].evaluate(model_data['x_test'], model_data['y_test'], verbose=2)
2/2 - 0s - loss: 0.3370 - accuracy: 0.8824 - false_positives: 6.0000 - true_positives: 45.0000 - recall: 0.8824 - 212ms/epoch - 106ms/step
ShowModelStats(
history = model_results['History'],
actual_y = model_data['y_test'],
predictions_y = y_predictions,
label = "Default Data - Random Initialisation with Adam",
encoder = model_data['encoder'])
def VisualPredictions(model, data):
enc = model_data['encoder']
test_indices = [2, 3, 33, 36, 50]
for index in test_indices:
plt.figure(figsize=(2,2))
plt.imshow(model_data['x_test'][index])
plt.show()
print('Predicted Label', enc.inverse_transform(model.predict((data['x_test'][index].reshape(1,128,128,3)))))
print('True Label', enc.inverse_transform(data['y_test'])[index])
VisualPredictions(model_results['Model'], model_data)
1/1 [==============================] - 0s 17ms/step Predicted Label ['Normal'] True Label Normal
1/1 [==============================] - 0s 17ms/step Predicted Label ['Covid'] True Label Covid
1/1 [==============================] - 0s 16ms/step Predicted Label ['Normal'] True Label Normal
1/1 [==============================] - 0s 16ms/step Predicted Label ['Viral Pneumonia'] True Label Viral Pneumonia
1/1 [==============================] - 0s 16ms/step Predicted Label ['Normal'] True Label Normal
model_data = CreateAndGetModelData(
data = main_data,
transformer = Blur,
should_transform = True)
===============Input Data=============== X Data Shape: (251, 128, 128, 3) Y Data Shape: (251, 1) ===============Split Data=================== x_train shape: (200, 128, 128, 3), y_train shape: (200, 1) x_test shape: (51, 128, 128, 3), y_test shape: (51, 1) ==============Reshape the Data============= x_train shape: (200, 128, 128, 3), y_train shape: (200, 1) x_test shape: (51, 128, 128, 3), y_test shape: (51, 1) ============Normalize the Data=============== x_train_normalized shape: (200, 128, 128, 3) x_test_normalized shape: (51, 128, 128, 3) ==========Encode Target Variables============ y_train_encoded shape: (200, 3) y_test_encoded shape: (51, 3)
model_results = GetBaseModel(
current_optimiser = "adam",
data = model_data,
activation_funtion = 'relu',
scores = model_evaluation_scores)
model_evaluation_scores = model_results['Stats']
clear_output()
model_results['History'].describe().T
| count | mean | std | min | 25% | 50% | 75% | max | |
|---|---|---|---|---|---|---|---|---|
| loss | 50.0 | 0.137407 | 0.199740 | 0.004470 | 0.043637 | 0.066708 | 0.147894 | 1.176347 |
| accuracy | 50.0 | 0.945500 | 0.083910 | 0.518750 | 0.939063 | 0.975000 | 0.985938 | 1.000000 |
| false_positives | 50.0 | 6.480000 | 8.595775 | 0.000000 | 2.000000 | 3.000000 | 8.750000 | 49.000000 |
| true_positives | 50.0 | 149.740000 | 17.008773 | 65.000000 | 150.250000 | 156.000000 | 157.000000 | 160.000000 |
| recall | 50.0 | 0.935875 | 0.106305 | 0.406250 | 0.939063 | 0.975000 | 0.981250 | 1.000000 |
| val_loss | 50.0 | 0.507002 | 0.224672 | 0.188394 | 0.310187 | 0.502188 | 0.662393 | 1.059134 |
| val_accuracy | 50.0 | 0.845000 | 0.103016 | 0.500000 | 0.825000 | 0.875000 | 0.918750 | 0.950000 |
| val_false_positives | 50.0 | 2.280000 | 1.340728 | 0.000000 | 1.000000 | 3.000000 | 3.000000 | 5.000000 |
| val_true_positives | 50.0 | 28.800000 | 9.441550 | 0.000000 | 24.500000 | 32.500000 | 35.000000 | 37.000000 |
| val_recall | 50.0 | 0.720000 | 0.236039 | 0.000000 | 0.612500 | 0.812500 | 0.875000 | 0.925000 |
y_predictions = GetModelPredictions(model_results['Model'], model_data)
2/2 [==============================] - 0s 68ms/step
precision recall f1-score support
0 0.88 1.00 0.93 14
1 1.00 0.63 0.77 19
2 0.89 0.94 0.92 18
micro avg 0.91 0.84 0.88 51
macro avg 0.92 0.86 0.88 51
weighted avg 0.93 0.84 0.87 51
samples avg 0.84 0.84 0.84 51
accuracy = model_results['Model'].evaluate(model_data['x_test'], model_data['y_test'], verbose=2)
2/2 - 0s - loss: 0.3654 - accuracy: 0.8627 - false_positives: 4.0000 - true_positives: 43.0000 - recall: 0.8431 - 205ms/epoch - 102ms/step
ShowModelStats(
history = model_results['History'],
actual_y = model_data['y_test'],
predictions_y = y_predictions,
label = "Default Data - Random Initialisation with Adam",
encoder = model_data['encoder'])
VisualPredictions(model_results['Model'], model_data)
1/1 [==============================] - 0s 17ms/step Predicted Label ['Normal'] True Label Normal
1/1 [==============================] - 0s 18ms/step Predicted Label ['Covid'] True Label Covid
1/1 [==============================] - 0s 17ms/step Predicted Label ['Covid'] True Label Normal
1/1 [==============================] - 0s 17ms/step Predicted Label ['Viral Pneumonia'] True Label Viral Pneumonia
1/1 [==============================] - 0s 17ms/step Predicted Label ['Normal'] True Label Normal
Scores(model_evaluation_scores)
| 0 | 1 | |
|---|---|---|
| Hidden Layer Activation | relu | relu |
| Output Layer Activation | Softmax | Softmax |
| Loss | binary_crossentropy | binary_crossentropy |
| Weight Initialisation | random_normal | random_normal |
| Max Accuracy Train | 1.0 | 1.0 |
| Max Accuracy Test | 0.95 | 0.95 |
| Max Recall Train | 1.0 | 1.0 |
| Max Recall Test | 0.9 | 0.925 |